Interface to typed allocator is now just xmalloc/xmalloc_array/xfree.
_xmalloc/_xmalloc_array are dead (or, at least, non-API).
Signed-off-by: keir.fraser@cl.cam.ac.uk
#define DECLARE_MUTEX(_m) spinlock_t _m = SPIN_LOCK_UNLOCKED
#define down(_m) spin_lock(_m)
#define up(_m) spin_unlock(_m)
-#define vmalloc(_s) _xmalloc(_s)
+#define vmalloc(_s) ((void *)xmalloc(u8[_s]))
#define vfree(_p) xfree(_p)
#define num_online_cpus() smp_num_cpus
static inline int on_each_cpu(
if (ret & 0xff00)
printk(KERN_ERR "PCI: Error %02x when fetching IRQ routing table.\n", (ret >> 8) & 0xff);
else if (opt.size) {
- rt = _xmalloc(sizeof(struct irq_routing_table) + opt.size);
+ rt = (struct irq_routing_table *)xmalloc(u8[sizeof(struct irq_routing_table) + opt.size]);
if (rt) {
memset(rt, 0, sizeof(struct irq_routing_table));
rt->size = opt.size + sizeof(struct irq_routing_table);
{
m->shadow_dirty_bitmap_size = (p->max_pages + 63) & ~63;
m->shadow_dirty_bitmap =
- _xmalloc(m->shadow_dirty_bitmap_size/8);
+ xmalloc_array(unsigned long, m->shadow_dirty_bitmap_size /
+ (8 * sizeof(unsigned long)));
if ( m->shadow_dirty_bitmap == NULL )
{
m->shadow_dirty_bitmap_size = 0;
{
SH_LOG("Allocate more shadow hashtable blocks.");
- extra = _xmalloc(
- sizeof(void *) + (shadow_ht_extra_size * sizeof(*x)));
+ extra = (struct shadow_status *)xmalloc(
+ u8[sizeof(void *) + (shadow_ht_extra_size * sizeof(*x))]);
/* XXX Should be more graceful here. */
if ( extra == NULL )
extern void dump_slabinfo();
/* Nicely typesafe for you. */
-#define xmalloc(type) ((type *)_xmalloc(sizeof(type)))
-#define xmalloc_array(type, num) ((type *)_xmalloc_array(sizeof(type), (num)))
-
-static inline void *_xmalloc_array(size_t size, size_t num)
-{
- /* Check for overflow. */
- if (size && num > UINT_MAX / size)
- return NULL;
- return _xmalloc(size * num);
-}
+#define xmalloc(_type) ((typeof(_type) *)_xmalloc(sizeof(_type)))
+#define xmalloc_array(_type, _num) \
+((_type *)(((_num) > (UINT_MAX / sizeof(_type))) ? \
+ NULL : _xmalloc((_num) * sizeof(_type))))
#endif /* __ARCH_HAS_SLAB_ALLOCATOR */
#endif /* __SLAB_H__ */